}
int xc_shadow_control(int xc_handle,
- u64 domid,
+ u32 domid,
unsigned int sop,
- unsigned long *dirty_bitmap,
- unsigned long pages,
- unsigned long *fault_count,
- unsigned long *dirty_count)
+ unsigned long *dirty_bitmap,
+ unsigned long pages,
+ xc_shadow_control_stats_t *stats )
{
int rc;
dom0_op_t op;
rc = do_dom0_op(xc_handle, &op);
- if ( fault_count )
- *fault_count = op.u.shadow_control.fault_count;
- if ( dirty_count )
- *dirty_count = op.u.shadow_control.dirty_count;
+ if(stats) memcpy(stats, &op.u.shadow_control.stats,
+ sizeof(xc_shadow_control_stats_t));
- if ( rc == 0 )
- return op.u.shadow_control.pages;
- else
- return rc;
+ return (rc == 0) ? op.u.shadow_control.pages : rc;
}
int xc_domain_setname(int xc_handle,
return (new->tv_sec * 1000000) + new->tv_usec;
}
- static long long tvdelta( struct timeval *new, struct timeval *old )
+ static long long llgettimeofday()
+ {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return tv_to_us(&now);
+ }
+
+ static long long tv_delta( struct timeval *new, struct timeval *old )
{
return ((new->tv_sec - old->tv_sec)*1000000 ) +
- (new->tv_usec - old->tv_usec);
+ (new->tv_usec - old->tv_usec);
}
- static int track_cpu_usage( int xc_handle, u32 domid, int faults,
- int pages_sent, int pages_dirtied, int print )
-static int print_stats( int xc_handle, u64 domid,
++static int print_stats( int xc_handle, u32 domid,
+ int pages_sent, xc_shadow_control_stats_t *stats,
+ int print )
{
static struct timeval wall_last;
static long long d0_cpu_last;
}
-static int analysis_phase( int xc_handle, u64 domid,
++static int analysis_phase( int xc_handle, u32 domid,
+ int nr_pfns, unsigned long *arr )
+ {
+ long long start, now;
+ xc_shadow_control_stats_t stats;
+
+ start = llgettimeofday();
+
+ while(0)
+ {
+ int i;
+
+ xc_shadow_control( xc_handle, domid,
+ DOM0_SHADOW_CONTROL_OP_CLEAN2,
+ arr, nr_pfns, NULL);
+ printf("#Flush\n");
+ for(i=0;i<100;i++)
+ {
+ usleep(10000);
+ now = llgettimeofday();
+ xc_shadow_control( xc_handle, domid,
+ DOM0_SHADOW_CONTROL_OP_PEEK,
+ NULL, 0, &stats);
+
+ printf("now= %lld faults= %ld dirty= %ld dirty_net= %ld dirty_block= %ld\n",
+ ((now-start)+500)/1000,
+ stats.fault_count, stats.dirty_count,
+ stats.dirty_net_count, stats.dirty_block_count );
+
+ }
+
+
+ }
+
+
+ return -1;
+ }
+
int xc_linux_save(int xc_handle,
- u64 domid,
+ u32 domid,
- unsigned int flags,
- int (*writerfn)(void *, const void *, size_t),
- void *writerst )
+ unsigned int flags,
+ int (*writerfn)(void *, const void *, size_t),
+ void *writerst )
{
dom0_op_t op;
int rc = 1, i, j, k, last_iter, iter = 0;
}
/* Ensure that the domain exists, and that it is stopped. */
- if ( xc_domain_pause(xc_handle, domid) )
+
- if ( xc_domain_stop_sync( xc_handle, domid, &op, &ctxt ) )
++ if ( xc_domain_pause( xc_handle, domid, &op, &ctxt ) )
{
- PERROR("Could not pause domain");
- goto out;
- PERROR("Could not sync stop domain");
++ PERROR("Could not pause domain");
+ goto out;
}
memcpy(name, op.u.getdomaininfo.name, sizeof(name));
if( live )
{
- if ( xc_shadow_control( xc_handle, domid,
- DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
- NULL, 0, NULL, NULL ) < 0 )
- {
- ERROR("Couldn't enable shadow mode");
- goto out;
- }
-
- if ( xc_domain_unpause(xc_handle, domid) < 0 )
- {
- ERROR("Couldn't unpause domain");
- goto out;
- }
-
- last_iter = 0;
- sent_last_iter = 1<<20; /* 4GB's worth of pages */
+ if ( xc_shadow_control( xc_handle, domid,
+ DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
+ NULL, 0, NULL ) < 0 )
+ {
+ ERROR("Couldn't enable shadow mode");
+ goto out;
+ }
+
- if ( xc_domain_start( xc_handle, domid ) < 0 )
++ if ( xc_domain_unpause( xc_handle, domid ) < 0 )
+ {
- ERROR("Couldn't restart domain");
++ ERROR("Couldn't unpause domain");
+ goto out;
+ }
+
+ last_iter = 0;
+ sent_last_iter = 1<<20; // 4GB's worth of pages
}
else
- last_iter = 1;
+ last_iter = 1;
+ /* calculate the power of 2 order of nr_pfns, e.g.
+ 15->4 16->4 17->5 */
+ for( i=nr_pfns-1, order_nr=0; i ; i>>=1, order_nr++ );
/* Setup to_send bitmap */
{
while(1)
{
- unsigned int prev_pc, sent_this_iter, N, batch;
+ unsigned int prev_pc, sent_this_iter, N, batch;
+
+ iter++;
+ sent_this_iter = 0;
+ skip_this_iter = 0;
+ prev_pc = 0;
+ N=0;
+
+ verbose_printf("Saving memory pages: iter %d 0%%", iter);
+
+ while( N < nr_pfns )
+ {
+ unsigned int this_pc = (N * 100) / nr_pfns;
+
+ if ( (this_pc - prev_pc) >= 5 )
+ {
+ verbose_printf("\b\b\b\b%3d%%", this_pc);
+ prev_pc = this_pc;
+ }
+
+ /* slightly wasteful to peek the whole array evey time,
+ but this is fast enough for the moment. */
+
+ if ( !last_iter &&
+ xc_shadow_control(xc_handle, domid,
+ DOM0_SHADOW_CONTROL_OP_PEEK,
+ to_skip, nr_pfns, NULL) != nr_pfns )
+ {
+ ERROR("Error peeking shadow bitmap");
+ goto out;
+ }
+
+
+ /* load pfn_type[] with the mfn of all the pages we're doing in
+ this batch. */
+
+ for( batch = 0; batch < BATCH_SIZE && N < nr_pfns ; N++ )
+ {
+ int n = permute(N, nr_pfns, order_nr );
+
+ if(0 && debug)
+ fprintf(stderr,"%d pfn= %08lx mfn= %08lx %d [mfn]= %08lx\n",
+ iter, n, live_pfn_to_mfn_table[n],
+ test_bit(n,to_send),
+ live_mfn_to_pfn_table[live_pfn_to_mfn_table[n]&0xFFFFF]);
+
+ if (!last_iter && test_bit(n, to_send) && test_bit(n, to_skip))
+ skip_this_iter++; // stats keeping
+
+ if (! ( (test_bit(n, to_send) && !test_bit(n, to_skip)) ||
+ (test_bit(n, to_send) && last_iter) ||
+ (test_bit(n, to_fix) && last_iter) ) )
+ continue;
+
+ /* we get here if:
+ 1. page is marked to_send & hasn't already been re-dirtied
+ 2. (ignore to_skip in last iteration)
+ 3. add in pages that still need fixup (net bufs)
+ */
+
+ pfn_batch[batch] = n;
+ pfn_type[batch] = live_pfn_to_mfn_table[n];
+
+ if( pfn_type[batch] == 0x80000004 )
+ {
+ /* not currently in pusedo-physical map -- set bit
+ in to_fix that we must send this page in last_iter
+ unless its sent sooner anyhow */
+
+ set_bit( n, to_fix );
+ if( iter>1 )
+ DDPRINTF("Urk! netbuf race: iter %d, pfn %lx. mfn %lx\n",
+ iter,n,pfn_type[batch]);
+ continue;
+ }
+
+ if ( last_iter && test_bit(n, to_fix ) && !test_bit(n, to_send ))
+ {
+ needed_to_fix++;
+ DPRINTF("Fix! iter %d, pfn %lx. mfn %lx\n",
+ iter,n,pfn_type[batch]);
+ }
+
+ clear_bit( n, to_fix );
+
+ batch++;
+ }
+
+ DDPRINTF("batch %d:%d (n=%d)\n",iter,batch,n);
+
+ if(batch == 0) goto skip; // vanishingly unlikely...
+
+ if ( (region_base = mfn_mapper_map_batch( xc_handle, domid,
+ PROT_READ,
+ pfn_type,
+ batch )) == 0)
+ {
+ PERROR("map batch failed");
+ goto out;
+ }
+
+ if ( get_pfn_type_batch(xc_handle, domid, batch, pfn_type) )
+ {
+ ERROR("get_pfn_type_batch failed");
+ goto out;
+ }
+
+ for( j = 0; j < batch; j++ )
+ {
+ if( (pfn_type[j] & LTAB_MASK) == XTAB)
+ {
+ DDPRINTF("type fail: page %i mfn %08lx\n",j,pfn_type[j]);
+ continue;
+ }
+
+ if(0 && debug)
+ fprintf(stderr,"%d pfn= %08lx mfn= %08lx [mfn]= %08lx sum= %08lx\n",
+ iter,
+ (pfn_type[j] & LTAB_MASK) | pfn_batch[j],
+ pfn_type[j],
+ live_mfn_to_pfn_table[pfn_type[j]&(~LTAB_MASK)],
+ csum_page(region_base + (PAGE_SIZE*j))
+ );
+
+ /* canonicalise mfn->pfn */
+ pfn_type[j] = (pfn_type[j] & LTAB_MASK) |
+ pfn_batch[j];
+ //live_mfn_to_pfn_table[pfn_type[j]&~LTAB_MASK];
+
+ }
+
+
+ if ( (*writerfn)(writerst, &batch, sizeof(int) ) )
+ {
+ ERROR("Error when writing to state file (2)");
+ goto out;
+ }
+
+ if ( (*writerfn)(writerst, pfn_type, sizeof(unsigned long)*j ) )
+ {
+ ERROR("Error when writing to state file (3)");
+ goto out;
+ }
+
+ /* entering this loop, pfn_type is now in pfns (Not mfns) */
+ for( j = 0; j < batch; j++ )
+ {
+ /* write out pages in batch */
+
+ if( (pfn_type[j] & LTAB_MASK) == XTAB)
+ {
+ DDPRINTF("SKIP BOGUS page %i mfn %08lx\n",j,pfn_type[j]);
+ continue;
+ }
+
+ if ( ((pfn_type[j] & LTAB_MASK) == L1TAB) ||
+ ((pfn_type[j] & LTAB_MASK) == L2TAB) )
+ {
+
+ memcpy(page, region_base + (PAGE_SIZE*j), PAGE_SIZE);
+
+ for ( k = 0;
+ k < (((pfn_type[j] & LTAB_MASK) == L2TAB) ?
+ (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT) : 1024);
+ k++ )
+ {
+ unsigned long pfn;
+
+ if ( !(page[k] & _PAGE_PRESENT) ) continue;
+ mfn = page[k] >> PAGE_SHIFT;
+ pfn = live_mfn_to_pfn_table[mfn];
+
+ if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
+ {
+ // I don't think this should ever happen
+
+ printf("FNI %d : [%08lx,%d] pte=%08lx, mfn=%08lx, pfn=%08lx [mfn]=%08lx\n",
+ j, pfn_type[j], k,
+ page[k], mfn, live_mfn_to_pfn_table[mfn],
+ (live_mfn_to_pfn_table[mfn]<nr_pfns)?
+ live_pfn_to_mfn_table[live_mfn_to_pfn_table[mfn]]: 0xdeadbeef);
+
+ pfn = 0; // be suspicious, very suspicious
+
+ //goto out; // let's try our luck
+
+
+ }
+ page[k] &= PAGE_SIZE - 1;
+ page[k] |= pfn << PAGE_SHIFT;
+
+ #if 0
+ printf("L%d i=%d pfn=%d mfn=%d k=%d pte=%08lx xpfn=%d\n",
+ pfn_type[j]>>28,
+ j,i,mfn,k,page[k],page[k]>>PAGE_SHIFT);
+ #endif
+
+ } /* end of page table rewrite for loop */
+
+ if ( (*writerfn)(writerst, page, PAGE_SIZE) )
+ {
+ ERROR("Error when writing to state file (4)");
+ goto out;
+ }
+
+ } /* end of it's a PT page */
+ else
+ { /* normal page */
+
+ if ( (*writerfn)(writerst, region_base + (PAGE_SIZE*j), PAGE_SIZE) )
+ {
+ ERROR("Error when writing to state file (5)");
+ goto out;
+ }
+ }
+ } /* end of the write out for this batch */
+
+ sent_this_iter += batch;
+
+ } /* end of this while loop for this iteration */
+
+ munmap(region_base, batch*PAGE_SIZE);
- iter++;
- sent_this_iter = 0;
- skip_this_iter = 0;
- prev_pc = 0;
- N=0;
+ skip:
- verbose_printf("Saving memory pages: iter %d 0%%", iter);
+ total_sent += sent_this_iter;
- while( N < nr_pfns )
- {
- unsigned int this_pc = (N * 100) / nr_pfns;
-
- if ( (this_pc - prev_pc) >= 5 )
- {
- verbose_printf("\b\b\b\b%3d%%", this_pc);
- prev_pc = this_pc;
- }
-
- /* slightly wasteful to peek the whole array evey time,
- but this is fast enough for the moment. */
-
- if ( !last_iter &&
- xc_shadow_control(xc_handle, domid,
- DOM0_SHADOW_CONTROL_OP_PEEK,
- to_skip, nr_pfns, NULL, NULL) != nr_pfns )
- {
- ERROR("Error peeking shadow bitmap");
- goto out;
- }
-
+ verbose_printf("\r %d: sent %d, skipped %d, ",
+ iter, sent_this_iter, skip_this_iter );
- /* load pfn_type[] with the mfn of all the pages we're doing in
- this batch. */
-
- for( batch = 0; batch < BATCH_SIZE && N < nr_pfns ; N++ )
- {
- int n = permute(N, nr_pfns, order_nr );
-
- if(0 && debug)
- fprintf(stderr,"%d pfn= %08lx mfn= %08lx %d "
- "[mfn]= %08lx\n",
- iter, n, live_pfn_to_mfn_table[n],
- test_bit(n,to_send),
- live_mfn_to_pfn_table[
- live_pfn_to_mfn_table[n]&0xFFFFF]);
-
- if (!last_iter && test_bit(n, to_send) && test_bit(n, to_skip))
- skip_this_iter++; /* stats keeping */
-
- if (! ( (test_bit(n, to_send) && !test_bit(n, to_skip)) ||
- (test_bit(n, to_send) && last_iter) ||
- (test_bit(n, to_fix) && last_iter) ) )
- continue;
-
- /* we get here if:
- 1. page is marked to_send & hasn't already been re-dirtied
- 2. (ignore to_skip in last iteration)
- 3. add in pages that still need fixup (net bufs)
- */
-
- pfn_batch[batch] = n;
- pfn_type[batch] = live_pfn_to_mfn_table[n];
-
- if( pfn_type[batch] == 0x80000004 )
- {
- /* not currently in pusedo-physical map -- set bit
- in to_fix that we must send this page in last_iter
- unless its sent sooner anyhow */
-
- set_bit( n, to_fix );
- if( iter>1 )
- DDPRINTF("Urk! netbuf race: iter %d, pfn %lx."
- " mfn %lx\n",
- iter,n,pfn_type[batch]);
- continue;
- }
-
- if ( last_iter && test_bit(n, to_fix) &&
- !test_bit(n, to_send) )
- {
- needed_to_fix++;
- DPRINTF("Fix! iter %d, pfn %lx. mfn %lx\n",
- iter,n,pfn_type[batch]);
- }
-
- clear_bit( n, to_fix );
-
- batch++;
- }
-
- DDPRINTF("batch %d:%d (n=%d)\n",iter,batch,n);
-
- if ( batch == 0 )
- goto skip; /* very unlikely */
-
- if ( (region_base = mfn_mapper_map_batch(xc_handle, domid,
- PROT_READ,
- pfn_type,
- batch)) == 0 )
- {
- PERROR("map batch failed");
- goto out;
- }
-
- if ( get_pfn_type_batch(xc_handle, domid, batch, pfn_type) )
- {
- ERROR("get_pfn_type_batch failed");
- goto out;
- }
-
- for ( j = 0; j < batch; j++ )
- {
- if ( (pfn_type[j] & LTAB_MASK) == XTAB )
- {
- DDPRINTF("type fail: page %i mfn %08lx\n",j,pfn_type[j]);
- continue;
- }
-
- if ( 0 && debug )
- fprintf(stderr,"%d pfn= %08lx mfn= %08lx "
- "[mfn]= %08lx sum= %08lx\n",
- iter,
- (pfn_type[j] & LTAB_MASK) | pfn_batch[j],
- pfn_type[j],
- live_mfn_to_pfn_table[pfn_type[j]&(~LTAB_MASK)],
- csum_page(region_base + (PAGE_SIZE*j))
- );
-
- /* canonicalise mfn->pfn */
- pfn_type[j] = (pfn_type[j] & LTAB_MASK) |
- pfn_batch[j];
- }
+ if ( last_iter )
+ {
+ print_stats( xc_handle, domid, sent_this_iter, &stats, 1);
-
- if ( (*writerfn)(writerst, &batch, sizeof(int) ) )
- {
- ERROR("Error when writing to state file (2)");
- goto out;
- }
-
- if ( (*writerfn)(writerst, pfn_type, sizeof(unsigned long)*j ) )
- {
- ERROR("Error when writing to state file (3)");
- goto out;
- }
-
- /* entering this loop, pfn_type is now in pfns (Not mfns) */
- for( j = 0; j < batch; j++ )
- {
- /* write out pages in batch */
-
- if( (pfn_type[j] & LTAB_MASK) == XTAB)
- {
- DDPRINTF("SKIP BOGUS page %i mfn %08lx\n",j,pfn_type[j]);
- continue;
- }
-
- if ( ((pfn_type[j] & LTAB_MASK) == L1TAB) ||
- ((pfn_type[j] & LTAB_MASK) == L2TAB) )
- {
-
- memcpy(page, region_base + (PAGE_SIZE*j), PAGE_SIZE);
-
- for ( k = 0;
- k < (((pfn_type[j] & LTAB_MASK) == L2TAB) ?
- (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT) :
- 1024);
- k++ )
- {
- unsigned long pfn;
-
- if ( !(page[k] & _PAGE_PRESENT) ) continue;
- mfn = page[k] >> PAGE_SHIFT;
- pfn = live_mfn_to_pfn_table[mfn];
-
- if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
- {
- /* I don't think this should ever happen */
- printf("FNI %d : [%08lx,%d] pte=%08lx, "
- "mfn=%08lx, pfn=%08lx [mfn]=%08lx\n",
- j, pfn_type[j], k,
- page[k], mfn, live_mfn_to_pfn_table[mfn],
- (live_mfn_to_pfn_table[mfn]<nr_pfns)?
- live_pfn_to_mfn_table[
- live_mfn_to_pfn_table[mfn]]:0xdeadbeef);
- pfn = 0; /* be suspicious, very suspicious */
- }
- page[k] &= PAGE_SIZE - 1;
- page[k] |= pfn << PAGE_SHIFT;
- } /* end of page table rewrite for loop */
-
- if ( (*writerfn)(writerst, page, PAGE_SIZE) )
- {
- ERROR("Error when writing to state file (4)");
- goto out;
- }
-
- } /* end of it's a PT page */
- else
- { /* normal page */
-
- if ( (*writerfn)(writerst, region_base + (PAGE_SIZE*j),
- PAGE_SIZE) )
- {
- ERROR("Error when writing to state file (5)");
- goto out;
- }
- }
- } /* end of the write out for this batch */
-
- sent_this_iter += batch;
+ verbose_printf("Total pages sent= %d (%.2fx)\n",
+ total_sent, ((float)total_sent)/nr_pfns );
+ verbose_printf("(of which %d were fixups)\n", needed_to_fix );
+ }
- } /* end of this while loop for this iteration */
+ if ( debug && last_iter )
+ {
+ int minusone = -1;
+ memset( to_send, 0xff, (nr_pfns+8)/8 );
+ debug = 0;
+ printf("Entering debug resend-all mode\n");
+
+ /* send "-1" to put receiver into debug mode */
+ if ( (*writerfn)(writerst, &minusone, sizeof(int)) )
+ {
+ ERROR("Error when writing to state file (6)");
+ goto out;
+ }
- munmap(region_base, batch*PAGE_SIZE);
+ continue;
+ }
- skip:
+ if ( last_iter )
+ break;
- total_sent += sent_this_iter;
+ if ( live )
+ {
+ if (
+ // ( sent_this_iter > (sent_last_iter * 0.95) ) ||
+ (iter >= max_iters) ||
+ (sent_this_iter+skip_this_iter < 50) ||
+ (total_sent > nr_pfns*max_factor) )
+ {
+ DPRINTF("Start last iteration\n");
+ last_iter = 1;
- verbose_printf("\r %d: sent %d, skipped %d, ",
- iter, sent_this_iter, skip_this_iter );
- xc_domain_stop_sync( xc_handle, domid, &op, NULL );
++ xc_domain_pause( xc_handle, domid, &op, NULL );
- if ( last_iter )
- {
- track_cpu_usage( xc_handle, domid, 0, sent_this_iter, 0, 1);
+ }
- verbose_printf("Total pages sent= %d (%.2fx)\n",
- total_sent, ((float)total_sent)/nr_pfns );
- verbose_printf("(of which %d were fixups)\n", needed_to_fix );
- }
+ if ( xc_shadow_control( xc_handle, domid,
+ DOM0_SHADOW_CONTROL_OP_CLEAN2,
+ to_send, nr_pfns, &stats ) != nr_pfns )
+ {
+ ERROR("Error flushing shadow PT");
+ goto out;
+ }
- if ( debug && last_iter )
- {
- int minusone = -1;
- memset( to_send, 0xff, nr_pfns/8 );
- debug = 0;
- printf("Entering debug resend-all mode\n");
-
- /* send "-1" to put receiver into debug mode */
- if ( (*writerfn)(writerst, &minusone, sizeof(int)) )
- {
- ERROR("Error when writing to state file (6)");
- goto out;
- }
-
- continue;
- }
+ sent_last_iter = sent_this_iter;
- if ( last_iter )
- break;
+ print_stats( xc_handle, domid, sent_this_iter, &stats, 1);
+
+ }
- if ( live )
- {
- if ( (iter >= max_iters) ||
- (sent_this_iter+skip_this_iter < 50) ||
- (total_sent > nr_pfns*max_factor) )
- {
- DPRINTF("Start last iteration\n");
- last_iter = 1;
-
- xc_domain_pause(xc_handle, domid);
- }
-
- if ( xc_shadow_control( xc_handle, domid,
- DOM0_SHADOW_CONTROL_OP_CLEAN2,
- to_send, nr_pfns, &faults_this_iter,
- &dirtied_this_iter) != nr_pfns )
- {
- ERROR("Error flushing shadow PT");
- goto out;
- }
-
- sent_last_iter = sent_this_iter;
-
- /* dirtied_this_iter = count_bits( nr_pfns, to_send ); */
- track_cpu_usage( xc_handle, domid, faults_this_iter,
- sent_this_iter, dirtied_this_iter, 1);
-
- }
} /* end of while 1 */
op.u.getdomaininfo.domain = (domid_t)domid;
op.u.getdomaininfo.ctxt = &ctxt;
if ( (do_dom0_op(xc_handle, &op) < 0) ||
- ((u32)op.u.getdomaininfo.domain != domid) )
- ((u64)op.u.getdomaininfo.domain != domid) )
++ ((u32)op.u.getdomaininfo.domain != domid) )
{
- PERROR("Could not get info on domain");
- goto out;
+ PERROR("Could not get info on domain");
+ goto out;
}
/* Canonicalise the suspend-record frame number. */
__scan_shadow_table( m, TABLE_OP_FREE_L1 );
send_bitmap:
- sc->stats.fault_count = p->mm.shadow_fault_count;
- sc->stats.dirty_count = p->mm.shadow_dirty_count;
- sc->stats.dirty_net_count = p->mm.shadow_dirty_net_count;
- sc->stats.dirty_block_count = p->mm.shadow_dirty_block_count;
-
- p->mm.shadow_fault_count = 0;
- p->mm.shadow_dirty_count = 0;
- p->mm.shadow_dirty_net_count = 0;
- p->mm.shadow_dirty_block_count = 0;
++ sc->stats.fault_count = d->mm.shadow_fault_count;
++ sc->stats.dirty_count = d->mm.shadow_dirty_count;
++ sc->stats.dirty_net_count = d->mm.shadow_dirty_net_count;
++ sc->stats.dirty_block_count = d->mm.shadow_dirty_block_count;
++
++ d->mm.shadow_fault_count = 0;
++ d->mm.shadow_dirty_count = 0;
++ d->mm.shadow_dirty_net_count = 0;
++ d->mm.shadow_dirty_block_count = 0;
+
- sc->pages = p->tot_pages;
++ sc->pages = d->tot_pages;
- if( p->tot_pages > sc->pages ||
- !sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
+ if( d->tot_pages > sc->pages ||
+ !sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
{
rc = -EINVAL;
goto out;
case DOM0_SHADOW_CONTROL_OP_PEEK:
{
int i;
+
+ sc->stats.fault_count = p->mm.shadow_fault_count;
+ sc->stats.dirty_count = p->mm.shadow_dirty_count;
+ sc->stats.dirty_net_count = p->mm.shadow_dirty_net_count;
+ sc->stats.dirty_block_count = p->mm.shadow_dirty_block_count;
- if( p->tot_pages > sc->pages ||
- !sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
+ if( d->tot_pages > sc->pages ||
+ !sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
{
rc = -EINVAL;
goto out;
--- /dev/null
+/*
+ * include/asm-x86/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_X86_PROCESSOR_H
+#define __ASM_X86_PROCESSOR_H
+
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/cpufeature.h>
+#include <asm/desc.h>
+#include <asm/flushtlb.h>
+#include <asm/pdb.h>
+#include <xen/config.h>
+#include <xen/spinlock.h>
+#include <hypervisor-ifs/hypervisor-if.h>
+
+struct domain;
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#ifdef __x86_64__
+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
+#else
+#define current_text_addr() \
+ ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+#endif
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
+ __u32 x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ int x86_cache_size; /* in KB - for CPUS that support this call */
+ int x86_clflush_size;
+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined */
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_SIS 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct tss_struct init_tss[NR_CPUS];
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_irq13;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void dodgy_tsc(void);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ */
+static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx");
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ecx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx");
+ return edx;
+}
+
+
+/*
+ * Intel CPU flags in CR0
+ */
+#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
+#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
+#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
+#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
+#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
+#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
+#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
+#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
+#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
+#define X86_CR0_PG 0x80000000 /* Paging (RW) */
+
+#define read_cr0() ({ \
+ unsigned long __dummy; \
+ __asm__( \
+ "mov"__OS" %%cr0,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+
+#define write_cr0(x) \
+ __asm__("mov"__OS" %0,%%cr0": :"r" (x));
+
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x0040 /* Machine check enable */
+#define X86_CR4_PGE 0x0080 /* enable global pages */
+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
+ "or"__OS" %0,%%"__OP"ax\n\t"
+ "mov"__OS" %%"__OP"ax,%%cr4\n"
+ : : "irg" (mask)
+ :"ax");
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+ mmu_cr4_features &= ~mask;
+ __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
+ "and"__OS" %0,%%"__OP"ax\n\t"
+ "movl"__OS" %%"__OP"ax,%%cr4\n"
+ : : "irg" (~mask)
+ :"ax");
+}
+
+/*
+ * Size of io_bitmap in longwords:
+ * For Xen we support the full 8kbyte IO bitmap but use the io_bitmap_sel field
+ * to avoid a full 8kbyte copy when switching to domains with bits cleared.
+ */
+#define IO_BITMAP_SIZE 2048
+#define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_state {
+ u8 state[512]; /* big enough for FXSAVE */
+} __attribute__ ((aligned (16)));
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct tss_struct {
+ unsigned short back_link,__blh;
+#ifdef __x86_64__
+ u64 rsp0;
+ u64 rsp1;
+ u64 rsp2;
+ u64 reserved1;
+ u64 ist[7];
+ u64 reserved2;
+ u16 reserved3;
+#else
+ u32 esp0;
+ u16 ss0,__ss0h;
+ u32 esp1;
+ u16 ss1,__ss1h;
+ u32 esp2;
+ u16 ss2,__ss2h;
+ u32 __cr3;
+ u32 eip;
+ u32 eflags;
+ u32 eax,ecx,edx,ebx;
+ u32 esp;
+ u32 ebp;
+ u32 esi;
+ u32 edi;
+ u16 es, __esh;
+ u16 cs, __csh;
+ u16 ss, __ssh;
+ u16 ds, __dsh;
+ u16 fs, __fsh;
+ u16 gs, __gsh;
+ u16 ldt, __ldth;
+ u16 trace;
+#endif
+ u16 bitmap;
+ u32 io_bitmap[IO_BITMAP_SIZE+1];
+ /* Pads the TSS to be cacheline-aligned (total size is 0x2080). */
+ u32 __cacheline_filler[5];
+};
+
+struct thread_struct {
+ unsigned long guestos_sp;
+ unsigned long guestos_ss;
+/* Hardware debugging registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
+/* floating point info */
+ struct i387_state i387;
+/* Trap info. */
+#ifdef __i386__
+ int fast_trap_idx;
+ struct desc_struct fast_trap_desc;
+#endif
+ trap_info_t traps[256];
+};
+
+#define IDT_ENTRIES 256
+extern struct desc_struct idt_table[];
+extern struct desc_struct *idt_tables[];
+
+#if defined(__i386__)
+
+#define SET_DEFAULT_FAST_TRAP(_p) \
+ (_p)->fast_trap_idx = 0x20; \
+ (_p)->fast_trap_desc.a = 0; \
+ (_p)->fast_trap_desc.b = 0;
+
+#define CLEAR_FAST_TRAP(_p) \
+ (memset(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
+ 0, 8))
+
+#ifdef XEN_DEBUGGER
+#define SET_FAST_TRAP(_p) \
+ (pdb_initialized ? (void *) 0 : \
+ (memcpy(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
+ &((_p)->fast_trap_desc), 8)))
+#else
+#define SET_FAST_TRAP(_p) \
+ (memcpy(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
+ &((_p)->fast_trap_desc), 8))
+#endif
+
+long set_fast_trap(struct domain *p, int idx);
+
+#define INIT_THREAD { \
+ 0, 0, \
+ { [0 ... 7] = 0 }, /* debugging registers */ \
+ { { 0, }, }, /* 387 state */ \
+ 0x20, { 0, 0 }, /* DEFAULT_FAST_TRAP */ \
+ { {0} } /* io permissions */ \
+}
+
+#elif defined(__x86_64__)
+
+#define INIT_THREAD { 0 }
+
+#endif /* __x86_64__ */
+
+struct mm_struct {
+ /*
+ * Every domain has a L1 pagetable of its own. Per-domain mappings
+ * are put in this table (eg. the current GDT is mapped here).
+ */
+ l1_pgentry_t *perdomain_pt;
+ pagetable_t pagetable;
+
+ /* shadow mode status and controls */
+ unsigned int shadow_mode; /* flags to control shadow table operation */
+ pagetable_t shadow_table;
+ spinlock_t shadow_lock;
+ unsigned int shadow_max_page_count; // currently unused
+
+ /* shadow hashtable */
+ struct shadow_status *shadow_ht;
+ struct shadow_status *shadow_ht_free;
+ struct shadow_status *shadow_ht_extras; /* extra allocation units */
+ unsigned int shadow_extras_count;
+
+ /* shadow dirty bitmap */
+ unsigned long *shadow_dirty_bitmap;
+ unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */
+
+ /* shadow mode stats */
+ unsigned int shadow_page_count;
+ unsigned int shadow_fault_count;
+ unsigned int shadow_dirty_count;
++ unsigned int shadow_dirty_net_count;
++ unsigned int shadow_dirty_block_count;
+
+ /* Current LDT details. */
+ unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
+ /* Next entry is passed to LGDT on domain switch. */
+ char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
+};
+
+static inline void write_ptbase(struct mm_struct *mm)
+{
+ unsigned long pa;
+
+ if ( unlikely(mm->shadow_mode) )
+ pa = pagetable_val(mm->shadow_table);
+ else
+ pa = pagetable_val(mm->pagetable);
+
+ __asm__ __volatile__ ( "mov"__OS" %0, %%cr3" : : "r" (pa) : "memory" );
+}
+
+#define IDLE0_MM \
+{ \
+ perdomain_pt: 0, \
+ pagetable: mk_pagetable(__pa(idle_pg_table)) \
+}
+
+/* Convenient accessor for mm.gdt. */
+#define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
+#define SET_GDT_ADDRESS(_p, _a) ((*(unsigned long *)((_p)->mm.gdt + 2)) = (_a))
+#define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
+#define GET_GDT_ADDRESS(_p) ((*(unsigned long *)((_p)->mm.gdt + 2)))
+
+long set_gdt(struct domain *p,
+ unsigned long *frames,
+ unsigned int entries);
+
+long set_debugreg(struct domain *p, int reg, unsigned long value);
+
+struct microcode {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int reserved[5];
+ unsigned int bits[500];
+};
+
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE _IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__("rep;nop");
+}
+
+#define cpu_relax() rep_nop()
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+#ifdef CONFIG_MPENTIUMIII
+
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
+}
+
+#elif CONFIG_X86_USE_3DNOW
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+extern inline void prefetch(const void *x)
+{
+ __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
+}
+
+extern inline void prefetchw(const void *x)
+{
+ __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
+}
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#endif
+
+#endif /* __ASM_X86_PROCESSOR_H */
#define DOM0_SHADOW_CONTROL_OP_CLEAN 11
#define DOM0_SHADOW_CONTROL_OP_PEEK 12
#define DOM0_SHADOW_CONTROL_OP_CLEAN2 13
-typedef struct dom0_shadow_control_stats_st
+
- unsigned long fault_count;
- unsigned long dirty_count;
- unsigned long dirty_net_count;
- unsigned long dirty_block_count;
++typedef struct dom0_shadow_control
+ {
-typedef struct dom0_shadow_control_st
-{
++ u32 fault_count;
++ u32 dirty_count;
++ u32 dirty_net_count;
++ u32 dirty_block_count;
+ } dom0_shadow_control_stats_t;
+
+typedef struct {
/* IN variables. */
- domid_t domain;
- int op;
- unsigned long *dirty_bitmap; // pointe to mlocked buffer
- /* IN/OUT variables */
- unsigned long pages; // size of buffer, updated with actual size
- /* OUT varaibles */
- dom0_shadow_control_stats_t stats;
-} dom0_shadow_control_t;
+ domid_t domain; /* 0 */
+ u32 op; /* 4 */
+ unsigned long *dirty_bitmap; /* 8: pointer to locked buffer */
+ MEMORY_PADDING;
+ /* IN/OUT variables. */
+ memory_t pages; /* 16: size of buffer, updated with actual size */
+ MEMORY_PADDING;
+ /* OUT variables. */
- memory_t fault_count; /* 24 */
- MEMORY_PADDING;
- memory_t dirty_count; /* 32 */
- MEMORY_PADDING;
- } PACKED dom0_shadow_control_t; /* 40 bytes */
++ dom0_shadow_control_stats_t;
++} PACKED dom0_shadow_control_t;
++
#define DOM0_SETDOMAINNAME 26
-typedef struct dom0_setdomainname_st
-{
+typedef struct {
/* IN variables. */
- domid_t domain;
- char name[MAX_DOMAIN_NAME];
-} dom0_setdomainname_t;
+ domid_t domain; /* 0 */
+ u8 name[MAX_DOMAIN_NAME]; /* 4 */
+} PACKED dom0_setdomainname_t; /* 20 bytes */
#define DOM0_SETDOMAININITIALMEM 27
-typedef struct dom0_setdomaininitialmem_st
-{
+typedef struct {
/* IN variables. */
- domid_t domain;
- unsigned int initial_memkb; /* use before domain is built */
-} dom0_setdomaininitialmem_t;
+ domid_t domain; /* 0 */
+ u32 __pad;
+ memory_t initial_memkb; /* 8 */
+ MEMORY_PADDING;
+} PACKED dom0_setdomaininitialmem_t; /* 16 bytes */
#define DOM0_SETDOMAINMAXMEM 28
-typedef struct dom0_setdomainmaxmem_st
-{
+typedef struct {
/* IN variables. */
- domid_t domain;
- unsigned int max_memkb;
-} dom0_setdomainmaxmem_t;
+ domid_t domain; /* 0 */
+ u32 __pad;
+ memory_t max_memkb; /* 8 */
+ MEMORY_PADDING;
+} PACKED dom0_setdomainmaxmem_t; /* 16 bytes */
#define DOM0_GETPAGEFRAMEINFO2 29 /* batched interface */
-typedef struct dom0_getpageframeinfo2_st
-{
+typedef struct {
/* IN variables. */
- domid_t domain; /* To which domain do frames belong? */
- int num;
+ domid_t domain; /* 0 */
+ u32 __pad;
+ memory_t num; /* 8 */
+ MEMORY_PADDING;
/* IN/OUT variables. */
- unsigned long *array;
-} dom0_getpageframeinfo2_t;
-
-
-typedef struct dom0_op_st
-{
- unsigned long cmd;
- unsigned long interface_version; /* DOM0_INTERFACE_VERSION */
- union
- {
- unsigned long dummy[4];
- dom0_createdomain_t createdomain;
- dom0_startdomain_t startdomain;
- dom0_stopdomain_t stopdomain;
- dom0_destroydomain_t destroydomain;
- dom0_getmemlist_t getmemlist;
- dom0_schedctl_t schedctl;
- dom0_adjustdom_t adjustdom;
- dom0_builddomain_t builddomain;
- dom0_getdomaininfo_t getdomaininfo;
- dom0_getpageframeinfo_t getpageframeinfo;
- dom0_iopl_t iopl;
- dom0_msr_t msr;
- dom0_debug_t debug;
- dom0_settime_t settime;
- dom0_readconsole_t readconsole;
- dom0_pincpudomain_t pincpudomain;
- dom0_gettbufs_t gettbufs;
- dom0_physinfo_t physinfo;
- dom0_pcidev_access_t pcidev_access;
- dom0_sched_id_t sched_id;
- dom0_shadow_control_t shadow_control;
- dom0_setdomainname_t setdomainname;
+ unsigned long *array; /* 16 */
+ MEMORY_PADDING;
+} PACKED dom0_getpageframeinfo2_t; /* 24 bytes */
+
+typedef struct {
+ u32 cmd; /* 0 */
+ u32 interface_version; /* 4 */ /* DOM0_INTERFACE_VERSION */
+ union { /* 8 */
+ u32 dummy[18]; /* 72 bytes */
+ dom0_createdomain_t createdomain;
+ dom0_pausedomain_t pausedomain;
+ dom0_unpausedomain_t unpausedomain;
+ dom0_destroydomain_t destroydomain;
+ dom0_getmemlist_t getmemlist;
+ dom0_schedctl_t schedctl;
+ dom0_adjustdom_t adjustdom;
+ dom0_builddomain_t builddomain;
+ dom0_getdomaininfo_t getdomaininfo;
+ dom0_getpageframeinfo_t getpageframeinfo;
+ dom0_iopl_t iopl;
+ dom0_msr_t msr;
+ dom0_debug_t debug;
+ dom0_settime_t settime;
+ dom0_readconsole_t readconsole;
+ dom0_pincpudomain_t pincpudomain;
+ dom0_gettbufs_t gettbufs;
+ dom0_physinfo_t physinfo;
+ dom0_pcidev_access_t pcidev_access;
+ dom0_sched_id_t sched_id;
+ dom0_shadow_control_t shadow_control;
+ dom0_setdomainname_t setdomainname;
dom0_setdomaininitialmem_t setdomaininitialmem;
- dom0_setdomainmaxmem_t setdomainmaxmem;
+ dom0_setdomainmaxmem_t setdomainmaxmem;
dom0_getpageframeinfo2_t getpageframeinfo2;
- } u;
-} dom0_op_t;
+ } PACKED u;
+} PACKED dom0_op_t; /* 80 bytes */
#endif /* __DOM0_OPS_H__ */
/************************************************************************/
- static inline void __mark_dirty( struct mm_struct *m, unsigned int mfn )
+ static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn )
{
unsigned int pfn;
+ int rc = 0;
ASSERT(spin_is_locked(&m->shadow_lock));
-
- //printk("%08x %08lx\n", mfn, machine_to_phys_mapping[mfn] );
pfn = machine_to_phys_mapping[mfn];
/* We use values with the top bit set to mark MFNs that aren't
really part of the domain's psuedo-physical memory map e.g.
the shared info frame. Nothing to do here...
- */
+ */
- if ( unlikely(pfn & 0x80000000U) ) return;
+ if ( unlikely(pfn & 0x80000000U) ) return rc;
ASSERT(m->shadow_dirty_bitmap);
if( likely(pfn<m->shadow_dirty_bitmap_size) )
}
else
{
- extern void show_traceX(void);
- SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
- mfn, pfn, m->shadow_dirty_bitmap_size, m );
- SH_LOG("dom=%lld caf=%08x taf=%08x\n",
- frame_table[mfn].u.domain->domain,
- frame_table[mfn].count_and_flags,
- frame_table[mfn].type_and_flags );
- //show_traceX();
+ extern void show_traceX(void);
+ SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
+ mfn, pfn, m->shadow_dirty_bitmap_size, m );
+ SH_LOG("dom=%u caf=%08x taf=%08x\n",
+ frame_table[mfn].u.domain->domain,
+ frame_table[mfn].count_and_flags,
+ frame_table[mfn].type_and_flags );
}
-
+
+ return rc;
}
- static inline void mark_dirty( struct mm_struct *m, unsigned int mfn )
+ static inline int mark_dirty( struct mm_struct *m, unsigned int mfn )
-{
- int rc;
+{
++ int rc;
ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("+");
spin_lock(&m->shadow_lock);
- __mark_dirty( m, mfn );
+ rc = __mark_dirty( m, mfn );
spin_unlock(&m->shadow_lock);
- return rc;
++ return rc;
}